[IA64] copy_from/to_guest
authorawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Mon, 14 Aug 2006 20:21:21 +0000 (14:21 -0600)
committerawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Mon, 14 Aug 2006 20:21:21 +0000 (14:21 -0600)
This patch fixes the copy_from/to_guest problem.
As Akio reported, modularised netback causes dom0's down.

The following process is happened in gnttab_transfer()@
xen/common/grant_table.c:

  gnttab_transfer()
   => steal_page()
     => assign_domain_page_cmpxchg_rel()
       => domain_page_flush()
         => domain_flush_vtlb_all()  // all TLBs are flushed
   ...
   => __copy_to_guest_offset()               // always fail to copy

The embedded netback module has no problem because it uses TR pinned
data.  But modularised one is out of TR. So copy_from/to_guest issue
must be solved in order to modularise drivers.

Signed-off-by: Kouya SHIMURA <kouya@jp.fujitsu.com>
linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c
linux-2.6-xen-sparse/include/asm-ia64/hypercall.h
xen/arch/ia64/xen/hypercall.c
xen/arch/ia64/xen/vcpu.c
xen/include/asm-ia64/domain.h
xen/include/asm-ia64/uaccess.h

index 00efd9a1f71a2d6a5a6179e57c60a3a9faf26cae..10dc6a6477d2a33b99eb05b4836a2f08d7c066ad 100644 (file)
@@ -371,6 +371,8 @@ gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop)
 int
 HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
 {
+       __u64 va1, va2, pa1, pa2;
+
        if (cmd == GNTTABOP_map_grant_ref) {
                unsigned int i;
                for (i = 0; i < count; i++) {
@@ -378,8 +380,29 @@ HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
                                (struct gnttab_map_grant_ref*)uop + i);
                }
        }
-
-       return ____HYPERVISOR_grant_table_op(cmd, uop, count);
+       va1 = (__u64)uop & PAGE_MASK;
+       pa1 = pa2 = 0;
+       if ((REGION_NUMBER(va1) == 5) &&
+           ((va1 - KERNEL_START) >= KERNEL_TR_PAGE_SIZE)) {
+               pa1 = ia64_tpa(va1);
+               if (cmd <= GNTTABOP_transfer) {
+                       static uint32_t uop_size[GNTTABOP_transfer + 1] = {
+                               sizeof(struct gnttab_map_grant_ref),
+                               sizeof(struct gnttab_unmap_grant_ref),
+                               sizeof(struct gnttab_setup_table),
+                               sizeof(struct gnttab_dump_table),
+                               sizeof(struct gnttab_transfer),
+                       };
+                       va2 = (__u64)uop + (uop_size[cmd] * count) - 1;
+                       va2 &= PAGE_MASK;
+                       if (va1 != va2) {
+                               /* maximum size of uop is 2pages */
+                               BUG_ON(va2 > va1 + PAGE_SIZE);
+                               pa2 = ia64_tpa(va2);
+                       }
+               }
+       }
+       return ____HYPERVISOR_grant_table_op(cmd, uop, count, pa1, pa2);
 }
 EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
 
index e2fe103e65eb951bc083187844da50b848a4e52f..cc43e9bf1510e17b35e5a5037d5e59935a627d63 100644 (file)
@@ -275,9 +275,10 @@ HYPERVISOR_physdev_op(
 //XXX __HYPERVISOR_grant_table_op is used for this hypercall constant.
 static inline int
 ____HYPERVISOR_grant_table_op(
-    unsigned int cmd, void *uop, unsigned int count)
+    unsigned int cmd, void *uop, unsigned int count,
+    unsigned long pa1, unsigned long pa2)
 {
-    return _hypercall3(int, grant_table_op, cmd, uop, count);
+    return _hypercall5(int, grant_table_op, cmd, uop, count, pa1, pa2);
 }
 
 int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
index 46d279b5b28437e5d788196e4c2d12dd8c420a9d..2c000246868bbcec26a2cc6e5154991899673ed9 100644 (file)
@@ -105,6 +105,19 @@ static IA64FAULT
 xen_hypercall (struct pt_regs *regs)
 {
        uint32_t cmd = (uint32_t)regs->r2;
+       struct vcpu *v = current;
+
+       if (cmd == __HYPERVISOR_grant_table_op) {
+               XEN_GUEST_HANDLE(void) uop;
+
+               v->arch.hypercall_param.va = regs->r15;
+               v->arch.hypercall_param.pa1 = regs->r17;
+               v->arch.hypercall_param.pa2 = regs->r18;
+               set_xen_guest_handle(uop, (void *)regs->r15);
+               regs->r8 = do_grant_table_op(regs->r14, uop, regs->r16);
+               v->arch.hypercall_param.va = 0;
+               return IA64_NO_FAULT;
+       }
 
        if (cmd < NR_hypercalls) {
                perfc_incra(hypercalls, cmd);
index acbc492686db47fa2425e7e0ddae5be57cf403f1..39f9b1d8ed13f053c9203edbb05e3a7586aded8e 100644 (file)
@@ -2215,3 +2215,28 @@ IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 log_range)
 
        return IA64_NO_FAULT;
 }
+
+int ia64_map_hypercall_param(void)
+{
+       struct vcpu *v = current;
+       struct domain *d = current->domain;
+       u64 vaddr = v->arch.hypercall_param.va & PAGE_MASK;
+       volatile pte_t* pte;
+
+       if (v->arch.hypercall_param.va == 0)
+               return FALSE;
+       pte = lookup_noalloc_domain_pte(d, v->arch.hypercall_param.pa1);
+       if (!pte || !pte_present(*pte))
+               return FALSE;
+       vcpu_itc_no_srlz(v, 2, vaddr, pte_val(*pte), -1UL, PAGE_SHIFT);
+       if (v->arch.hypercall_param.pa2) {
+               vaddr += PAGE_SIZE;
+               pte = lookup_noalloc_domain_pte(d, v->arch.hypercall_param.pa2);
+               if (pte && pte_present(*pte)) {
+                       vcpu_itc_no_srlz(v, 2, vaddr, pte_val(*pte),
+                                        -1UL, PAGE_SHIFT);
+               }
+       }
+       ia64_srlz_d();
+       return TRUE;
+}
index 9c565cff9f24d7272765298dd587afcfae5a0b1f..55882fcfabbd5d57b5d6719d20b10a481cce6ecd 100644 (file)
@@ -142,6 +142,12 @@ struct arch_domain {
     (sizeof(vcpu_info_t) * (v)->vcpu_id + \
     offsetof(vcpu_info_t, evtchn_upcall_mask))
 
+struct hypercall_param {
+    unsigned long va;
+    unsigned long pa1;
+    unsigned long pa2;
+};
+
 struct arch_vcpu {
     /* Save the state of vcpu.
        This is the first entry to speed up accesses.  */
@@ -185,6 +191,9 @@ struct arch_vcpu {
     char irq_new_pending;
     char irq_new_condition;    // vpsr.i/vtpr change, check for pending VHPI
     char hypercall_continuation;
+
+    struct hypercall_param hypercall_param;  // used to remap a hypercall param
+
     //for phycial  emulation
     unsigned long old_rsc;
     int mode_flags;
index 6cf5158954ecad092c33fe31892ffddb158b60b3..17e2a17f9d71fc7c66d705db4c768214ec34f187 100644 (file)
@@ -211,16 +211,30 @@ extern void __put_user_unknown (void);
 extern unsigned long __must_check __copy_user (void __user *to, const void __user *from,
                                               unsigned long count);
 
+extern int ia64_map_hypercall_param(void);
+
 static inline unsigned long
 __copy_to_user (void __user *to, const void *from, unsigned long count)
 {
-       return __copy_user(to, (void __user *) from, count);
+       unsigned long len;
+       len = __copy_user(to, (void __user *)from, count);
+       if (len == 0)
+               return 0;
+       if (ia64_map_hypercall_param())
+               len = __copy_user(to, (void __user *)from, count); /* retry */
+       return len;
 }
 
 static inline unsigned long
 __copy_from_user (void *to, const void __user *from, unsigned long count)
 {
-       return __copy_user((void __user *) to, from, count);
+       unsigned long len;
+       len = __copy_user((void __user *)to, from, count);
+       if (len == 0)
+               return 0;
+       if (ia64_map_hypercall_param())
+               len = __copy_user((void __user *) to, from, count); /* retry */
+       return len;
 }
 
 #define __copy_to_user_inatomic                __copy_to_user